/* Sync AP's TSC with BSP's. */
v->arch.hvm_vcpu.cache_tsc_offset =
- v->domain->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset;
- hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset,
- v->domain->arch.hvm_domain.sync_tsc);
+ d->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset;
+ hvm_set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset,
+ d->arch.hvm_domain.sync_tsc);
paging_update_paging_modes(v);
delta_tsc = guest_tsc - tsc;
v->arch.hvm_vcpu.cache_tsc_offset = delta_tsc;
- hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, at_tsc);
+ hvm_set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, at_tsc);
}
#define hvm_set_guest_tsc(v, t) hvm_set_guest_tsc_fixed(v, t, 0)
{
v->arch.hvm_vcpu.cache_tsc_offset += tsc_adjust
- v->arch.hvm_vcpu.msr_tsc_adjust;
- hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, 0);
+ hvm_set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, 0);
v->arch.hvm_vcpu.msr_tsc_adjust = tsc_adjust;
}
/* Sync AP's TSC with BSP's. */
v->arch.hvm_vcpu.cache_tsc_offset =
v->domain->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset;
- hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset,
- d->arch.hvm_domain.sync_tsc);
+ hvm_set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset,
+ d->arch.hvm_domain.sync_tsc);
v->arch.hvm_vcpu.msr_tsc_adjust = 0;
hvm_inject_hw_exception(TRAP_gp_fault, 0);
}
- hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, 0);
+ hvm_set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, 0);
vvmcs_to_shadow_bulk(v, ARRAY_SIZE(vmentry_fields), vmentry_fields);
hvm_inject_hw_exception(TRAP_gp_fault, 0);
}
- hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, 0);
+ hvm_set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, 0);
set_vvmcs(v, VM_ENTRY_INTR_INFO, 0);
}
* will sync their TSC to BSP's sync_tsc.
*/
d->arch.hvm_domain.sync_tsc = rdtsc();
- hvm_funcs.set_tsc_offset(d->vcpu[0],
- d->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset,
- d->arch.hvm_domain.sync_tsc);
+ hvm_set_tsc_offset(d->vcpu[0],
+ d->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset,
+ d->arch.hvm_domain.sync_tsc);
}
}
hvm_funcs.cpuid_policy_changed(v);
}
+static inline void hvm_set_tsc_offset(struct vcpu *v, uint64_t offset,
+ uint64_t at_tsc)
+{
+ hvm_funcs.set_tsc_offset(v, offset, at_tsc);
+}
+
/*
* Called to ensure than all guest-specific mappings in a tagged TLB are
* flushed; does *not* flush Xen's TLB entries, and on processors without a